#include "xg_private.h"
#include "xc_core.h"
+static int max_gpfn(int xc_handle, domid_t domid)
+{
+ return xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &domid);
+}
+
int
xc_core_arch_auto_translated_physmap(const xc_dominfo_t *info)
{
- if ( info->hvm )
- return 1;
- return 0;
+ return info->hvm;
}
int
xc_core_memory_map_t **mapp,
unsigned int *nr_entries)
{
- unsigned long max_pfn = live_shinfo->arch.max_pfn;
- xc_core_memory_map_t *map = NULL;
+ unsigned long max_pfn = max_gpfn(xc_handle, info->domid);
+ xc_core_memory_map_t *map;
map = malloc(sizeof(*map));
- if ( !map )
+ if ( map == NULL )
{
PERROR("Could not allocate memory");
- goto out;
+ return -1;
}
map->addr = 0;
*mapp = map;
*nr_entries = 1;
return 0;
-
-out:
- if ( map )
- free(map);
- return -1;
}
int
xen_pfn_t *live_p2m_frame_list_list = NULL;
xen_pfn_t *live_p2m_frame_list = NULL;
uint32_t dom = info->domid;
- unsigned long max_pfn = live_shinfo->arch.max_pfn;
+ unsigned long max_pfn = max_gpfn(xc_handle, info->domid);
int ret = -1;
int err;
/* NB. evtchn_upcall_mask is unused: leave as zero. */
memset(&shared_info->evtchn_mask[0], 0xff,
sizeof(shared_info->evtchn_mask));
- shared_info->arch.max_pfn = page_array[nr_pages - 1];
munmap(shared_info, PAGE_SIZE);
if ( v_end > HVM_BELOW_4G_RAM_END )
{
DECLARE_DOMCTL;
- /* The new domain's shared-info frame number. */
- unsigned long shared_info_frame;
-
/* A copy of the CPU context of the guest. */
vcpu_guest_context_t ctxt;
uint8_t *hvm_buf = NULL;
unsigned long long v_end, memsize;
unsigned long shared_page_nr;
- shared_info_t *shared_info = NULL;
- xen_pfn_t arch_max_pfn;
unsigned long pfn;
unsigned int prev_pc, this_pc;
/* Types of the pfns in the current region */
unsigned long region_pfn_type[MAX_BATCH_SIZE];
- struct xen_add_to_physmap xatp;
-
/* Number of pages of memory the guest has. *Not* the same as max_pfn. */
unsigned long nr_pages;
pfns[i] = i;
for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < pfn_array_size; i++ )
pfns[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
- arch_max_pfn = pfns[max_pfn];/* used later */
/* Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000. */
rc = xc_domain_memory_populate_physmap(
goto out;
}
- /* Shared-info pfn */
- if (!read_exact(io_fd, &(shared_info_frame), sizeof(uint32_t)) ) {
- ERROR("reading the shared-info pfn failed!\n");
- goto out;
- }
- /* Map the shared-info frame where it was before */
- xatp.domid = dom;
- xatp.space = XENMAPSPACE_shared_info;
- xatp.idx = 0;
- xatp.gpfn = shared_info_frame;
- if ( (rc = xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp)) != 0 ) {
- ERROR("setting the shared-info pfn failed!\n");
- goto out;
- }
- if ( (xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp) != 0) ||
- ((shared_info = xc_map_foreign_range(
- xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
- shared_info_frame)) == NULL) )
- goto out;
- /* shared_info.arch.max_pfn is used by dump-core */
- shared_info->arch.max_pfn = arch_max_pfn;
- munmap(shared_info, PAGE_SIZE);
-
rc = 0;
goto out;
/* The size of an array big enough to contain all guest pfns */
unsigned long pfn_array_size;
- /* The new domain's shared-info frame number. */
- unsigned long shared_info_frame;
-
/* Other magic frames: ioreqs and xenstore comms */
unsigned long ioreq_pfn, bufioreq_pfn, store_pfn;
uint32_t hvm_buf_size;
uint8_t *hvm_buf = NULL;
- /* Live mapping of shared info structure */
- shared_info_t *live_shinfo = NULL;
-
/* base of the region in which domain memory is mapped */
unsigned char *region_base = NULL;
ERROR("HVM:Could not get vcpu context");
goto out;
}
- shared_info_frame = info.shared_info_frame;
/* cheesy sanity check */
if ((info.max_memkb >> (PAGE_SHIFT - 10)) > max_mfn) {
goto out;
}
- /* Map the shared info frame */
- if(!(live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
- PROT_READ, shared_info_frame))) {
- ERROR("HVM:Couldn't map live_shinfo");
- goto out;
- }
-
if ( xc_get_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, &store_pfn)
|| xc_get_hvm_param(xc_handle, dom, HVM_PARAM_IOREQ_PFN, &ioreq_pfn)
|| xc_get_hvm_param(xc_handle, dom,
DPRINTF("Saving HVM domain memory pages: iter %d 0%%", iter);
- if (last_iter && (max_pfn != live_shinfo->arch.max_pfn)) {
- DPRINTF("calculated max_pfn as %#lx, shinfo says %#lx\n",
- max_pfn, live_shinfo->arch.max_pfn);
- ERROR("Max pfn doesn't match shared info");
- goto out;
- }
-
while( N < pfn_array_size ){
unsigned int this_pc = (N * 100) / pfn_array_size;
goto out;
}
- DPRINTF("SUSPEND shinfo %08lx eip %08lx edx %08lx\n",
- info.shared_info_frame,
+ DPRINTF("SUSPEND eip %08lx edx %08lx\n",
(unsigned long)ctxt.user_regs.eip,
(unsigned long)ctxt.user_regs.edx);
}
ERROR("write HVM info failed!\n");
}
- /* Shared-info pfn */
- if (!write_exact(io_fd, &(shared_info_frame), sizeof(uint32_t)) ) {
- ERROR("write shared-info pfn failed!\n");
- goto out;
- }
-
/* Success! */
rc = 0;
goto out1;
}
break;
+ case XENMEM_current_reservation:
+ case XENMEM_maximum_reservation:
+ case XENMEM_maximum_gpfn:
+ if ( lock_pages(arg, sizeof(domid_t)) )
+ {
+ PERROR("Could not lock");
+ goto out1;
+ }
+ break;
}
ret = do_xen_hypercall(xc_handle, &hypercall);
case XENMEM_add_to_physmap:
unlock_pages(arg, sizeof(struct xen_add_to_physmap));
break;
+ case XENMEM_current_reservation:
+ case XENMEM_maximum_reservation:
+ case XENMEM_maximum_gpfn:
+ unlock_pages(arg, sizeof(domid_t));
+ break;
}
out1:
#include <asm/x86_emulate.h>
#include <asm/e820.h>
#include <asm/hypercall.h>
+#include <asm/shared.h>
#include <public/memory.h>
#define MEM_LOG(_f, _a...) gdprintk(XENLOG_WARNING , _f "\n" , ## _a)
return 0;
}
+unsigned long domain_get_maximum_gpfn(struct domain *d)
+{
+ return is_hvm_domain(d) ? d->arch.p2m.max_mapped_pfn : arch_get_max_pfn(d);
+}
+
void share_xen_page_with_guest(
struct page_info *page, struct domain *d, int readonly)
{
#include <asm/current.h>
#include <asm/flushtlb.h>
#include <asm/shadow.h>
-#include <asm/shared.h>
#include "private.h"
{
ASSERT(d->arch.paging.shadow.dirty_bitmap == NULL);
d->arch.paging.shadow.dirty_bitmap_size =
- (arch_get_max_pfn(d) + (BITS_PER_LONG - 1)) &
+ (domain_get_maximum_gpfn(d) + (BITS_PER_LONG - 1)) &
~(BITS_PER_LONG - 1);
d->arch.paging.shadow.dirty_bitmap =
xmalloc_array(unsigned long,
d->arch.paging.shadow.dirty_bitmap_size = 0;
return -ENOMEM;
}
- memset(d->arch.paging.shadow.dirty_bitmap, 0, d->arch.paging.shadow.dirty_bitmap_size/8);
+ memset(d->arch.paging.shadow.dirty_bitmap, 0,
+ d->arch.paging.shadow.dirty_bitmap_size/8);
return 0;
}
case XENMEM_current_reservation:
case XENMEM_maximum_reservation:
+ case XENMEM_maximum_gpfn:
{
#define xen_domid_t domid_t
#define compat_domid_t domid_compat_t
case XENMEM_maximum_ram_page:
case XENMEM_current_reservation:
case XENMEM_maximum_reservation:
+ case XENMEM_maximum_gpfn:
break;
case XENMEM_translate_gpfn_list:
case XENMEM_current_reservation:
case XENMEM_maximum_reservation:
+ case XENMEM_maximum_gpfn:
if ( copy_from_guest(&domid, arg, 1) )
return -EFAULT;
else if ( (d = rcu_lock_domain_by_id(domid)) == NULL )
return -ESRCH;
- rc = (op == XENMEM_current_reservation) ? d->tot_pages : d->max_pages;
+ switch ( op )
+ {
+ case XENMEM_current_reservation:
+ rc = d->tot_pages;
+ break;
+ case XENMEM_maximum_reservation:
+ rc = d->max_pages;
+ break;
+ default:
+ ASSERT(op == XENMEM_maximum_gpfn);
+ rc = domain_get_maximum_gpfn(d);
+ break;
+ }
if ( unlikely(domid != DOMID_SELF) )
rcu_unlock_domain(d);
#define domain_clamp_alloc_bitsize(d, b) (b)
+#define domain_get_maximum_gpfn(d) (-ENOSYS)
+
#endif /* __ASM_IA64_MM_H__ */
#define domain_clamp_alloc_bitsize(d, b) (b)
+#define domain_get_maximum_gpfn(d) (-ENOSYS)
+
#endif
# define domain_clamp_alloc_bitsize(d, b) (b)
#endif
+unsigned long domain_get_maximum_gpfn(struct domain *d);
#endif /* __ASM_X86_MM_H__ */
#define XENMEM_current_reservation 3
#define XENMEM_maximum_reservation 4
+/*
+ * Returns the maximum GPFN in use by the guest, or -ve errcode on failure.
+ */
+#define XENMEM_maximum_gpfn 14
+
/*
* Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
* mapping table. Architectures which do not have a m2p table do not implement